#include <xen/spinlock.h>
#include <asm/uaccess.h>
-extern const struct exception_table_entry __start___ex_table[];
-extern const struct exception_table_entry __stop___ex_table[];
-
static inline unsigned long
search_one_table(const struct exception_table_entry *first,
const struct exception_table_entry *last,
unsigned long
search_exception_table(unsigned long addr)
{
- return search_one_table(__start___ex_table, __stop___ex_table-1, addr);
+ extern const struct exception_table_entry __start___ex_table[];
+ extern const struct exception_table_entry __stop___ex_table[];
+ return search_one_table(
+ __start___ex_table, __stop___ex_table-1, addr);
+}
+
+#ifdef __i386__
+unsigned long
+search_pre_exception_table(unsigned long addr)
+{
+ extern const struct exception_table_entry __start___pre_ex_table[];
+ extern const struct exception_table_entry __stop___pre_ex_table[];
+ unsigned long fixup = search_one_table(
+ __start___pre_ex_table, __stop___pre_ex_table-1, addr);
+ DPRINTK("Pre-exception: %08lx -> %08lx\n", addr, fixup);
+ return fixup;
}
+#endif
asmlinkage void do_IRQ(struct xen_regs regs)
{
#if defined(__i386__)
- unsigned int irq = regs.orig_eax;
+ unsigned int irq = regs.entry_vector;
#else
unsigned int irq = 0; /* XXX */
#endif
if ( alert_counter[cpu] == 5*nmi_hz )
{
console_force_unlock();
- fatal_trap(TRAP_nmi, regs, 0);
+ fatal_trap(TRAP_nmi, regs);
}
}
else
(xen_regs->cs & 3) == 3 &&
xen_regs->eip != pdb_system_call_next_addr + 1)
{
- TRC(printf("pdb: user bkpt (0x%x) at 0x%x:0x%lx:0x%lx\n",
+ TRC(printf("pdb: user bkpt (0x%x) at 0x%x:0x%lx:0x%x\n",
exceptionVector, xen_regs->cs & 3, cr3, xen_regs->eip));
return 1;
}
(exceptionVector != KEYPRESS_EXCEPTION) &&
xen_regs->eip < 0xc0000000) /* Linux-specific for now! */
{
- TRC(printf("pdb: user bkpt (0x%x) at 0x%lx:0x%lx\n",
+ TRC(printf("pdb: user bkpt (0x%x) at 0x%lx:0x%x\n",
exceptionVector, cr3, xen_regs->eip));
return 1;
}
- printk("pdb_handle_exception [0x%x][0x%lx:0x%lx]\n",
+ printk("pdb_handle_exception [0x%x][0x%lx:0x%x]\n",
exceptionVector, cr3, xen_regs->eip);
if ( pdb_stepping )
{
d->thread.debugreg[6] = condition;
- tb->flags = TBF_TRAP_NOCODE;
+ tb->flags = TBF_EXCEPTION;
tb->cs = d->thread.traps[1].cs;
tb->eip = d->thread.traps[1].address;
}
void flush_tlb_mask(unsigned long mask)
{
- ASSERT(!in_irq());
+ ASSERT(local_irq_is_enabled());
if ( mask & (1 << smp_processor_id()) )
{
if ( mask != 0 )
{
- /*
- * We are certainly not reentering a flush_lock region on this CPU
- * because we are not in an IRQ context. We can therefore wait for the
- * other guy to release the lock. This is harder than it sounds because
- * local interrupts might be disabled, and he may be waiting for us to
- * execute smp_invalidate_interrupt(). We deal with this possibility by
- * inlining the meat of that function here.
- */
- while ( unlikely(!spin_trylock(&flush_lock)) )
- {
- if ( test_and_clear_bit(smp_processor_id(), &flush_cpumask) )
- local_flush_tlb();
- rep_nop();
- }
+ spin_lock(&flush_lock);
flush_cpumask = mask;
send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
/* Call with no locks held and interrupts enabled (e.g., softirq context). */
void new_tlbflush_clock_period(void)
{
+ ASSERT(local_irq_is_enabled());
+
/* Flush everyone else. We definitely flushed just before entry. */
if ( smp_num_cpus > 1 )
{
gs = __HYPERVISOR_DS;
}
- printk("CPU: %d\nEIP: %04x:[<%08lx>] \nEFLAGS: %08lx\n",
+ printk("CPU: %d\nEIP: %04x:[<%08x>] \nEFLAGS: %08x\n",
smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
- printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
+ printk("eax: %08x ebx: %08x ecx: %08x edx: %08x\n",
regs->eax, regs->ebx, regs->ecx, regs->edx);
- printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
+ printk("esi: %08x edi: %08x ebp: %08x esp: %08lx\n",
regs->esi, regs->edi, regs->ebp, esp);
printk("ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
ds, es, fs, gs, ss);
- show_stack(®s->esp);
+ show_stack((unsigned long *)®s->esp);
}
/*
* are disabled). In such situations we can't do much that is safe. We try to
* print out some tracing and then we just spin.
*/
-asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs, long error_code)
+asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs)
{
int cpu = smp_processor_id();
+ unsigned long cr2;
static char *trapstr[] = {
"divide error", "debug", "nmi", "bkpt", "overflow", "bounds",
"invalid operation", "device not available", "double fault",
};
show_registers(regs);
+
+ if ( trapnr == TRAP_page_fault )
+ {
+ __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (cr2) : );
+ printk("Faulting linear address might be %08lx\n", cr2);
+ }
+
printk("************************************\n");
- printk("CPU%d FATAL TRAP %d (%s), ERROR_CODE %lx%s.\n",
- cpu, trapnr, trapstr[trapnr], error_code,
+ printk("CPU%d FATAL TRAP %d (%s), ERROR_CODE %04x%s.\n",
+ cpu, trapnr, trapstr[trapnr], regs->error_code,
(regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
printk("System shutting down -- need manual reset.\n");
printk("************************************\n");
}
static inline int do_trap(int trapnr, char *str,
- struct xen_regs *regs,
- long error_code, int use_error_code)
+ struct xen_regs *regs,
+ int use_error_code)
{
struct domain *d = current;
struct trap_bounce *tb = &d->thread.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
- DEBUGGER_trap_entry(trapnr, regs, error_code);
+ DEBUGGER_trap_entry(trapnr, regs);
if ( !(regs->cs & 3) )
goto xen_fault;
ti = current->thread.traps + trapnr;
- tb->flags = use_error_code ? TBF_TRAP : TBF_TRAP_NOCODE;
- tb->error_code = error_code;
- tb->cs = ti->cs;
- tb->eip = ti->address;
+ tb->flags = TBF_EXCEPTION;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
+ if ( use_error_code )
+ {
+ tb->flags |= TBF_EXCEPTION_ERRCODE;
+ tb->error_code = regs->error_code;
+ }
if ( TI_GET_IF(ti) )
d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
return 0;
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
- DPRINTK("Trap %d: %08lx -> %08lx\n", trapnr, regs->eip, fixup);
+ DPRINTK("Trap %d: %08x -> %08lx\n", trapnr, regs->eip, fixup);
regs->eip = fixup;
return 0;
}
- DEBUGGER_trap_fatal(trapnr, regs, error_code);
+ DEBUGGER_trap_fatal(trapnr, regs);
show_registers(regs);
panic("CPU%d FATAL TRAP: vector = %d (%s)\n"
- "[error_code=%08x]\n",
- smp_processor_id(), trapnr, str, error_code);
+ "[error_code=%04x]\n",
+ smp_processor_id(), trapnr, str, regs->error_code);
return 0;
}
#define DO_ERROR_NOCODE(trapnr, str, name) \
-asmlinkage int do_##name(struct xen_regs * regs, long error_code) \
+asmlinkage int do_##name(struct xen_regs *regs) \
{ \
- return do_trap(trapnr, str, regs, error_code, 0); \
+ return do_trap(trapnr, str, regs, 0); \
}
#define DO_ERROR(trapnr, str, name) \
-asmlinkage int do_##name(struct xen_regs * regs, long error_code) \
+asmlinkage int do_##name(struct xen_regs *regs) \
{ \
- return do_trap(trapnr, str, regs, error_code, 1); \
+ return do_trap(trapnr, str, regs, 1); \
}
DO_ERROR_NOCODE( 0, "divide error", divide_error)
DO_ERROR(17, "alignment check", alignment_check)
DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error)
-asmlinkage int do_int3(struct xen_regs *regs, long error_code)
+asmlinkage int do_int3(struct xen_regs *regs)
{
struct domain *d = current;
struct trap_bounce *tb = &d->thread.trap_bounce;
trap_info_t *ti;
- DEBUGGER_trap_entry(TRAP_int3, regs, error_code);
+ DEBUGGER_trap_entry(TRAP_int3, regs);
if ( unlikely((regs->cs & 3) == 0) )
{
- DEBUGGER_trap_fatal(TRAP_int3, regs, error_code);
+ DEBUGGER_trap_fatal(TRAP_int3, regs);
show_registers(regs);
- panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n"
- "[error_code=%08x]\n",
- smp_processor_id(), error_code);
+ panic("CPU%d FATAL TRAP: vector = 3 (Int3)\n", smp_processor_id());
}
ti = current->thread.traps + 3;
- tb->flags = TBF_TRAP_NOCODE;
- tb->error_code = error_code;
- tb->cs = ti->cs;
- tb->eip = ti->address;
+ tb->flags = TBF_EXCEPTION;
+ tb->cs = ti->cs;
+ tb->eip = ti->address;
if ( TI_GET_IF(ti) )
d->shared_info->vcpu_data[0].evtchn_upcall_mask = 1;
__asm__ __volatile__ ( "hlt" );
}
-asmlinkage void do_machine_check(struct xen_regs *regs, long error_code)
+asmlinkage void do_machine_check(struct xen_regs *regs)
{
- fatal_trap(TRAP_machine_check, regs, error_code);
+ fatal_trap(TRAP_machine_check, regs);
}
-asmlinkage int do_page_fault(struct xen_regs *regs, long error_code)
+asmlinkage int do_page_fault(struct xen_regs *regs)
{
trap_info_t *ti;
unsigned long off, addr, fixup;
__asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
- DEBUGGER_trap_entry(TRAP_page_fault, regs, error_code);
+ DEBUGGER_trap_entry(TRAP_page_fault, regs);
perfc_incrc(page_faults);
}
if ( (addr < PAGE_OFFSET) &&
- ((error_code & 3) == 3) && /* write-protection fault */
+ ((regs->error_code & 3) == 3) && /* write-protection fault */
ptwr_do_page_fault(addr) )
{
if ( unlikely(d->mm.shadow_mode) )
- (void)shadow_fault(addr, error_code);
+ (void)shadow_fault(addr, regs->error_code);
return EXCRET_fault_fixed;
}
}
if ( unlikely(d->mm.shadow_mode) &&
- (addr < PAGE_OFFSET) && shadow_fault(addr, error_code) )
+ (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) )
return EXCRET_fault_fixed;
if ( unlikely(addr >= LDT_VIRT_START) &&
goto xen_fault;
ti = d->thread.traps + 14;
- tb->flags = TBF_TRAP_CR2; /* page fault pushes %cr2 */
+ tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE | TBF_EXCEPTION_CR2;
tb->cr2 = addr;
- tb->error_code = error_code;
+ tb->error_code = regs->error_code;
tb->cs = ti->cs;
tb->eip = ti->address;
if ( TI_GET_IF(ti) )
{
perfc_incrc(copy_user_faults);
if ( !d->mm.shadow_mode )
- DPRINTK("Page fault: %08lx -> %08lx\n", regs->eip, fixup);
+ DPRINTK("Page fault: %08x -> %08lx\n", regs->eip, fixup);
regs->eip = fixup;
return 0;
}
- DEBUGGER_trap_fatal(TRAP_page_fault, regs, error_code);
+ DEBUGGER_trap_fatal(TRAP_page_fault, regs);
if ( addr >= PAGE_OFFSET )
{
printk(" *pte = %08lx\n", page);
}
#ifdef MEMORY_GUARD
- if ( !(error_code & 1) )
+ if ( !(regs->error_code & 1) )
printk(" -- POSSIBLY AN ACCESS TO FREED MEMORY? --\n");
#endif
}
show_registers(regs);
panic("CPU%d FATAL PAGE FAULT\n"
- "[error_code=%08x]\n"
+ "[error_code=%04x]\n"
"Faulting linear address might be %08lx\n",
- smp_processor_id(), error_code, addr);
+ smp_processor_id(), regs->error_code, addr);
return 0;
}
-asmlinkage int do_general_protection(struct xen_regs *regs, long error_code)
+asmlinkage int do_general_protection(struct xen_regs *regs)
{
struct domain *d = current;
struct trap_bounce *tb = &d->thread.trap_bounce;
trap_info_t *ti;
unsigned long fixup;
- DEBUGGER_trap_entry(TRAP_gp_fault, regs, error_code);
+ DEBUGGER_trap_entry(TRAP_gp_fault, regs);
/* Badness if error in ring 0, or result of an interrupt. */
- if ( !(regs->cs & 3) || (error_code & 1) )
+ if ( !(regs->cs & 3) || (regs->error_code & 1) )
goto gp_in_kernel;
/*
* instruction. The DPL specified by the guest OS for these vectors is NOT
* CHECKED!!
*/
- if ( (error_code & 3) == 2 )
+ if ( (regs->error_code & 3) == 2 )
{
/* This fault must be due to <INT n> instruction. */
- ti = current->thread.traps + (error_code>>3);
+ ti = current->thread.traps + (regs->error_code>>3);
if ( TI_GET_DPL(ti) >= (regs->cs & 3) )
{
- tb->flags = TBF_TRAP_NOCODE;
+ tb->flags = TBF_EXCEPTION;
regs->eip += 2;
goto finish_propagation;
}
#if defined(__i386__)
if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments) &&
- (error_code == 0) &&
+ (regs->error_code == 0) &&
gpf_emulate_4gb(regs) )
return 0;
#endif
/* Pass on GPF as is. */
ti = current->thread.traps + 13;
- tb->flags = TBF_TRAP;
- tb->error_code = error_code;
+ tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
+ tb->error_code = regs->error_code;
finish_propagation:
tb->cs = ti->cs;
tb->eip = ti->address;
if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
{
- DPRINTK("GPF (%04lx): %08lx -> %08lx\n", error_code, regs->eip, fixup);
+ DPRINTK("GPF (%04x): %08x -> %08lx\n",
+ regs->error_code, regs->eip, fixup);
regs->eip = fixup;
return 0;
}
- DEBUGGER_trap_fatal(TRAP_gp_fault, regs, error_code);
+ DEBUGGER_trap_fatal(TRAP_gp_fault, regs);
show_registers(regs);
- panic("CPU%d GENERAL PROTECTION FAULT\n"
- "[error_code=%08x]\n", smp_processor_id(), error_code);
+ panic("CPU%d GENERAL PROTECTION FAULT\n[error_code=%04x]\n",
+ smp_processor_id(), regs->error_code);
return 0;
}
{
console_force_unlock();
printk("\n\nNMI - MEMORY ERROR\n");
- fatal_trap(TRAP_nmi, regs, 0);
+ fatal_trap(TRAP_nmi, regs);
}
asmlinkage void io_check_error(struct xen_regs *regs)
console_force_unlock();
printk("\n\nNMI - I/O ERROR\n");
- fatal_trap(TRAP_nmi, regs, 0);
+ fatal_trap(TRAP_nmi, regs);
}
static void unknown_nmi_error(unsigned char reason, struct xen_regs * regs)
send_guest_virq(dom0, VIRQ_IO_ERR);
}
-asmlinkage int math_state_restore(struct xen_regs *regs, long error_code)
+asmlinkage int math_state_restore(struct xen_regs *regs)
{
/* Prevent recursion. */
clts();
if ( test_and_clear_bit(DF_GUEST_STTS, ¤t->flags) )
{
struct trap_bounce *tb = ¤t->thread.trap_bounce;
- tb->flags = TBF_TRAP_NOCODE;
+ tb->flags = TBF_EXCEPTION;
tb->cs = current->thread.traps[7].cs;
tb->eip = current->thread.traps[7].address;
}
return EXCRET_fault_fixed;
}
-asmlinkage int do_debug(struct xen_regs *regs, long error_code)
+asmlinkage int do_debug(struct xen_regs *regs)
{
unsigned int condition;
struct domain *d = current;
struct trap_bounce *tb = &d->thread.trap_bounce;
- DEBUGGER_trap_entry(TRAP_debug, regs, error_code);
+ DEBUGGER_trap_entry(TRAP_debug, regs);
__asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
/* Save debug status register where guest OS can peek at it */
d->thread.debugreg[6] = condition;
- tb->flags = TBF_TRAP_NOCODE;
+ tb->flags = TBF_EXCEPTION;
tb->cs = d->thread.traps[1].cs;
tb->eip = d->thread.traps[1].address;
return EXCRET_not_a_fault;
}
-asmlinkage int do_spurious_interrupt_bug(
- struct xen_regs * regs, long error_code)
+asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs)
{
return EXCRET_not_a_fault;
}
#include <xen/sched.h>
#define DEFINE(_sym, _val) \
- __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" _val )
+ __asm__ __volatile__ ( "\n->" #_sym " %0 " #_val : : "i" (_val) )
#define BLANK() \
__asm__ __volatile__ ( "\n->" : : )
#define OFFSET(_sym, _str, _mem) \
OFFSET(XREGS_gs, struct xen_regs, gs);
OFFSET(XREGS_ss, struct xen_regs, ss);
OFFSET(XREGS_eflags, struct xen_regs, eflags);
- OFFSET(XREGS_orig_eax, struct xen_regs, orig_eax);
+ OFFSET(XREGS_error_code, struct xen_regs, error_code);
+ OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector);
+ OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp);
+ DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs));
BLANK();
OFFSET(DOMAIN_processor, struct domain, processor);
OFFSET(DOMAIN_failsafe_sel, struct domain, thread.failsafe_selector);
OFFSET(DOMAIN_failsafe_addr, struct domain, thread.failsafe_address);
OFFSET(DOMAIN_trap_bounce, struct domain, thread.trap_bounce);
+ OFFSET(DOMAIN_thread_flags, struct domain, thread.flags);
BLANK();
OFFSET(SHINFO_upcall_pending, shared_info_t,
*
* Copyright (c) 2002-2004, K A Fraser
* Copyright (c) 1991, 1992 Linus Torvalds
- */
-
-/*
- * The idea for callbacks to guest OSes
- * ====================================
- *
+ *
+ * Calling back to a guest OS:
+ * ===========================
+ *
* First, we require that all callbacks (either via a supplied
* interrupt-descriptor-table, or via the special event or failsafe callbacks
* in the shared-info-structure) are to ring 1. This just makes life easier,
* out which the privilege-level of the return code-selector. That code
* would just be a hassle to write, and would need to account for running
* off the end of the GDT/LDT, for example. For all callbacks we check
- * that the provided
- * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
- * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
- * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
- * If not, we load incorrect SS/ESP values from the TSS (for ring 1 rather
- * than the correct ring) and bad things are bound to ensue -- IRET is
+ * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
+ * we're safe as don't allow a guest OS to install ring-0 privileges into the
+ * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
+ * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
+ * rather than the correct ring) and bad things are bound to ensue -- IRET is
* likely to fault, and we may end up killing the domain (no harm can
* come to Xen, though).
*
ALIGN
restore_all_guest:
-1: movl XREGS_ds(%esp),%ds
-2: movl XREGS_es(%esp),%es
-3: movl XREGS_fs(%esp),%fs
-4: movl XREGS_gs(%esp),%gs
+ testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
+ jnz failsafe_callback
+FLT1: movl XREGS_ds(%esp),%ds
+FLT2: movl XREGS_es(%esp),%es
+FLT3: movl XREGS_fs(%esp),%fs
+FLT4: movl XREGS_gs(%esp),%gs
popl %ebx
- popl %ecx
- popl %edx
- popl %esi
- popl %edi
- popl %ebp
- popl %eax
+ popl %ecx
+ popl %edx
+ popl %esi
+ popl %edi
+ popl %ebp
+ popl %eax
addl $4,%esp
-5: iret
+FLT5: iret
.section .fixup,"ax"
-6: subl $4,%esp
- pushl %eax
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %edx
- pushl %ecx
- pushl %ebx
-7: SET_XEN_SEGMENTS(a)
- jmp failsafe_callback
+FIX5: subl $28,%esp
+ pushl 28(%esp) # error_code/entry_vector
+ movl %eax,XREGS_eax+4(%esp)
+ movl %ebp,XREGS_ebp+4(%esp)
+ movl %edi,XREGS_edi+4(%esp)
+ movl %esi,XREGS_esi+4(%esp)
+ movl %edx,XREGS_edx+4(%esp)
+ movl %ecx,XREGS_ecx+4(%esp)
+ movl %ebx,XREGS_ebx+4(%esp)
+FIX1: SET_XEN_SEGMENTS(a)
+ movl %eax,%fs
+ movl %eax,%gs
+ sti
+ popl %esi
+ pushfl # EFLAGS
+ movl $__HYPERVISOR_CS,%eax
+ pushl %eax # CS
+ movl $DBLFLT1,%eax
+ pushl %eax # EIP
+ pushl %esi # error_code/entry_vector
+ jmp error_code
+DBLFLT1:GET_CURRENT(%ebx)
+ jmp test_all_events
+DBLFIX1:GET_CURRENT(%ebx)
+ testb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
+ jnz domain_crash # cannot reenter failsafe code
+ orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
+ jmp test_all_events # will return via failsafe code
+.previous
+.section __pre_ex_table,"a"
+ .long FLT1,FIX1
+ .long FLT2,FIX1
+ .long FLT3,FIX1
+ .long FLT4,FIX1
+ .long FLT5,FIX5
.previous
.section __ex_table,"a"
- .align 4
- .long 1b,7b
- .long 2b,7b
- .long 3b,7b
- .long 4b,7b
- .long 5b,6b
+ .long DBLFLT1,DBLFIX1
.previous
/* No special register assumptions */
failsafe_callback:
GET_CURRENT(%ebx)
+ andb $~TF_failsafe_return,DOMAIN_thread_flags(%ebx)
leal DOMAIN_trap_bounce(%ebx),%edx
movl DOMAIN_failsafe_addr(%ebx),%eax
movl %eax,TRAPBOUNCE_eip(%edx)
movl DOMAIN_failsafe_sel(%ebx),%eax
movw %ax,TRAPBOUNCE_cs(%edx)
+ movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
- subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
- movl XREGS_ds(%esp),%eax
-FAULT1: movl %eax,%gs:(%esi)
- movl XREGS_es(%esp),%eax
-FAULT2: movl %eax,%gs:4(%esi)
- movl XREGS_fs(%esp),%eax
-FAULT3: movl %eax,%gs:8(%esi)
- movl XREGS_gs(%esp),%eax
-FAULT4: movl %eax,%gs:12(%esi)
- movl %esi,XREGS_esp(%esp)
popl %ebx
popl %ecx
popl %edx
popl %ebp
popl %eax
addl $4,%esp
-FAULT5: iret
+FLT6: iret
+.section .fixup,"ax"
+FIX6: pushl %ebx
+ GET_CURRENT(%ebx)
+ orb $TF_failsafe_return,DOMAIN_thread_flags(%ebx)
+ pop %ebx
+ jmp FIX5
+.section __pre_ex_table,"a"
+ .long FLT6,FIX6
+.previous
ALIGN
restore_all_xen:
ALIGN
ENTRY(hypercall)
- pushl %eax # save orig_eax
+ subl $4,%esp
SAVE_ALL(b)
sti
GET_CURRENT(%ebx)
call *SYMBOL_NAME(hypercall_table)(,%eax,4)
ret_from_hypercall:
- movl %eax,XREGS_eax(%esp) # save the return value
+ movl %eax,XREGS_eax(%esp) # save the return value
test_all_events:
xorl %ecx,%ecx
jnz restore_all_guest
testb $0xFF,SHINFO_upcall_pending(%eax)
jz restore_all_guest
- movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
/*process_guest_events:*/
leal DOMAIN_trap_bounce(%ebx),%edx
movl DOMAIN_event_addr(%ebx),%eax
movl %eax,TRAPBOUNCE_eip(%edx)
movl DOMAIN_event_sel(%ebx),%eax
movw %ax,TRAPBOUNCE_cs(%edx)
+ movw $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
call create_bounce_frame
+ movl DOMAIN_shared_info(%ebx),%eax
+ movb $1,SHINFO_upcall_mask(%eax) # Upcalls are masked during delivery
jmp restore_all_guest
ALIGN
/* %edx == trap_bounce, %ebx == task_struct */
/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */
create_bounce_frame:
- mov XREGS_cs+4(%esp),%cl
- test $2,%cl
+ movb XREGS_cs+4(%esp),%cl
+ testb $2,%cl
jz 1f /* jump if returning to an existing ring-1 activation */
/* obtain ss/esp from TSS -- no current ring-1 activations */
movl DOMAIN_processor(%ebx),%eax
addl %ecx,%eax
addl $init_tss + 12,%eax
movl (%eax),%esi /* tss->esp1 */
-FAULT6: movl 4(%eax),%gs /* tss->ss1 */
+FLT7: movl 4(%eax),%gs /* tss->ss1 */
/* base of stack frame must contain ss/esp (inter-priv iret) */
subl $8,%esi
movl XREGS_esp+4(%esp),%eax
-FAULT7: movl %eax,%gs:(%esi)
+FLT8: movl %eax,%gs:(%esi)
movl XREGS_ss+4(%esp),%eax
-FAULT8: movl %eax,%gs:4(%esi)
+FLT9: movl %eax,%gs:4(%esi)
jmp 2f
1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
movl XREGS_esp+4(%esp),%esi
-FAULT9: movl XREGS_ss+4(%esp),%gs
+FLT10: movl XREGS_ss+4(%esp),%gs
2: /* Construct a stack frame: EFLAGS, CS/EIP */
subl $12,%esi
movl XREGS_eip+4(%esp),%eax
-FAULT10:movl %eax,%gs:(%esi)
+FLT11: movl %eax,%gs:(%esi)
movl XREGS_cs+4(%esp),%eax
-FAULT11:movl %eax,%gs:4(%esi)
+FLT12: movl %eax,%gs:4(%esi)
movl XREGS_eflags+4(%esp),%eax
-FAULT12:movl %eax,%gs:8(%esi)
+FLT13: movl %eax,%gs:8(%esi)
+ movb TRAPBOUNCE_flags(%edx),%cl
+ test $TBF_EXCEPTION_ERRCODE,%cl
+ jz 1f
+ subl $4,%esi # push error_code onto guest frame
+ movl TRAPBOUNCE_error_code(%edx),%eax
+FLT14: movl %eax,%gs:(%esi)
+ testb $TBF_EXCEPTION_CR2,%cl
+ jz 2f
+ subl $4,%esi # push %cr2 onto guest frame
+ movl TRAPBOUNCE_cr2(%edx),%eax
+FLT15: movl %eax,%gs:(%esi)
+1: testb $TBF_FAILSAFE,%cl
+ jz 2f
+ subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame
+ movl XREGS_ds+4(%esp),%eax
+FLT16: movl %eax,%gs:(%esi)
+ movl XREGS_es+4(%esp),%eax
+FLT17: movl %eax,%gs:4(%esi)
+ movl XREGS_fs+4(%esp),%eax
+FLT18: movl %eax,%gs:8(%esi)
+ movl XREGS_gs+4(%esp),%eax
+FLT19: movl %eax,%gs:12(%esi)
+2: movb $0,TRAPBOUNCE_flags(%edx)
/* Rewrite our stack frame and return to ring 1. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
- andl $0xfffcbeff,%eax
- movl %eax,XREGS_eflags+4(%esp)
+ andl $0xfffcbeff,XREGS_eflags+4(%esp)
movl %gs,XREGS_ss+4(%esp)
movl %esi,XREGS_esp+4(%esp)
movzwl TRAPBOUNCE_cs(%edx),%eax
movl TRAPBOUNCE_eip(%edx),%eax
movl %eax,XREGS_eip+4(%esp)
ret
-
-.section __ex_table,"a"
- .align 4
- .long FAULT1, crash_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT2, crash_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT3, crash_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT4, crash_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT5, crash_domain_fixup1 # Fault executing failsafe iret
- .long FAULT6, crash_domain_fixup2 # Fault loading ring-1 stack selector
- .long FAULT7, crash_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT8, crash_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT9, crash_domain_fixup2 # Fault loading ring-1 stack selector
- .long FAULT10,crash_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT11,crash_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT12,crash_domain_fixup2 # Fault writing to ring-1 stack
- .long FAULT13,crash_domain_fixup3 # Fault writing to ring-1 stack
- .long FAULT14,crash_domain_fixup3 # Fault writing to ring-1 stack
-.previous
-
-# This handler kills domains which experience unrecoverable faults.
.section .fixup,"ax"
-crash_domain_fixup1:
- subl $4,%esp
- SAVE_ALL(a)
- sti
- jmp domain_crash
-crash_domain_fixup2:
- addl $4,%esp
-crash_domain_fixup3:
- jmp domain_crash
+FIX7: sti
+ popl %esi
+ addl $4,%esp # Discard create_b_frame return address
+ pushfl # EFLAGS
+ movl $__HYPERVISOR_CS,%eax
+ pushl %eax # CS
+ movl $DBLFLT2,%eax
+ pushl %eax # EIP
+ pushl %esi # error_code/entry_vector
+ jmp error_code
+DBLFLT2:jmp process_guest_exception_and_events
+.previous
+.section __pre_ex_table,"a"
+ .long FLT7,FIX7
+ .long FLT8,FIX7
+ .long FLT9,FIX7
+ .long FLT10,FIX7
+ .long FLT11,FIX7
+ .long FLT12,FIX7
+ .long FLT13,FIX7
+ .long FLT14,FIX7
+ .long FLT15,FIX7
+ .long FLT16,FIX7
+ .long FLT17,FIX7
+ .long FLT18,FIX7
+ .long FLT19,FIX7
+.previous
+.section __ex_table,"a"
+ .long DBLFLT2,domain_crash
.previous
ALIGN
-process_guest_exception_and_events:
+process_guest_exception_and_events:
leal DOMAIN_trap_bounce(%ebx),%edx
- testb $~0,TRAPBOUNCE_flags(%edx)
+ testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
jz test_all_events
- call create_bounce_frame # just the basic frame
- mov TRAPBOUNCE_flags(%edx),%cl
- test $TBF_TRAP_NOCODE,%cl
- jnz 2f
- subl $4,%esi # push error_code onto guest frame
- movl TRAPBOUNCE_error_code(%edx),%eax
-FAULT13:movl %eax,%gs:(%esi)
- test $TBF_TRAP_CR2,%cl
- jz 1f
- subl $4,%esi # push %cr2 onto guest frame
- movl TRAPBOUNCE_cr2(%edx),%eax
-FAULT14:movl %eax,%gs:(%esi)
-1: movl %esi,XREGS_esp(%esp)
-2: movb $0,TRAPBOUNCE_flags(%edx)
+ call create_bounce_frame
jmp test_all_events
ALIGN
GET_CURRENT(%ebx)
movb XREGS_cs(%esp),%al
testb $3,%al # return to non-supervisor?
- jne test_all_events
+ jnz test_all_events
jmp restore_all_xen
ENTRY(divide_error)
- pushl $0 # no error code
- pushl $ SYMBOL_NAME(do_divide_error)
+ pushl $TRAP_divide_error<<16
ALIGN
error_code:
- cld
- pushl %ebp
- pushl %edi
- pushl %esi
- pushl %edx
- pushl %ecx
- pushl %ebx
- movb XREGS_cs(%esp),%bl
- testb $3,%bl
- je 1f
- movl %ds,XREGS_ds(%esp)
- movl %es,XREGS_es(%esp)
- movl %fs,XREGS_fs(%esp)
- movl %gs,XREGS_gs(%esp)
-1: SET_XEN_SEGMENTS(b)
- movl XREGS_orig_eax(%esp),%esi # get the error code
- movl XREGS_eax(%esp),%edi # get the function address
- movl %eax,XREGS_eax(%esp)
- movl %esp,%edx
- pushl %esi # push the error code
+ SAVE_ALL_NOSEGREGS(a)
+ SET_XEN_SEGMENTS(a)
+ testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp)
+ jz exception_with_ints_disabled
+ sti # re-enable interrupts
+ xorl %eax,%eax
+ movw XREGS_entry_vector(%esp),%ax
+ movl %esp,%edx
pushl %edx # push the xen_regs pointer
GET_CURRENT(%ebx)
- call *%edi
- addl $8,%esp
+ call *SYMBOL_NAME(exception_table)(,%eax,4)
+ addl $4,%esp
movb XREGS_cs(%esp),%al
testb $3,%al
- je restore_all_xen
+ jz restore_all_xen
jmp process_guest_exception_and_events
+exception_with_ints_disabled:
+ movb XREGS_cs(%esp),%al
+ testb $3,%al # interrupts disabled outside Xen?
+ jnz FATAL_exception_with_ints_disabled
+ pushl XREGS_eip(%esp)
+ call search_pre_exception_table
+ addl $4,%esp
+ testl %eax,%eax # no fixup code for faulting EIP?
+ jz FATAL_exception_with_ints_disabled
+ movl %eax,XREGS_eip(%esp)
+ movl %esp,%esi
+ subl $4,%esp
+ movl %esp,%edi
+ movl $XREGS_kernel_sizeof/4,%ecx
+ rep; movsl # make room for error_code/entry_vector
+ movl XREGS_error_code(%esp),%eax # error_code/entry_vector
+ movl %eax,XREGS_kernel_sizeof(%esp)
+ jmp restore_all_xen # return to fixup code
+
+FATAL_exception_with_ints_disabled:
+ xorl %esi,%esi
+ movw XREGS_entry_vector(%esp),%si
+ movl %esp,%edx
+ pushl %edx # push the xen_regs pointer
+ pushl %esi # push the trapnr (entry vector)
+ call SYMBOL_NAME(fatal_trap)
+ ud2
+
ENTRY(coprocessor_error)
- pushl $0
- pushl $ SYMBOL_NAME(do_coprocessor_error)
+ pushl $TRAP_copro_error<<16
jmp error_code
ENTRY(simd_coprocessor_error)
- pushl $0
- pushl $ SYMBOL_NAME(do_simd_coprocessor_error)
+ pushl $TRAP_simd_error<<16
jmp error_code
ENTRY(device_not_available)
- pushl $0
- pushl $SYMBOL_NAME(math_state_restore)
+ pushl $TRAP_no_device<<16
jmp error_code
ENTRY(debug)
- pushl $0
- pushl $ SYMBOL_NAME(do_debug)
+ pushl $TRAP_debug<<16
jmp error_code
ENTRY(int3)
- pushl $0
- pushl $ SYMBOL_NAME(do_int3)
+ pushl $TRAP_int3<<16
jmp error_code
ENTRY(overflow)
- pushl $0
- pushl $ SYMBOL_NAME(do_overflow)
+ pushl $TRAP_overflow<<16
jmp error_code
ENTRY(bounds)
- pushl $0
- pushl $ SYMBOL_NAME(do_bounds)
+ pushl $TRAP_bounds<<16
jmp error_code
ENTRY(invalid_op)
- pushl $0
- pushl $ SYMBOL_NAME(do_invalid_op)
+ pushl $TRAP_invalid_op<<16
jmp error_code
ENTRY(coprocessor_segment_overrun)
- pushl $0
- pushl $ SYMBOL_NAME(do_coprocessor_segment_overrun)
+ pushl $TRAP_copro_seg<<16
jmp error_code
ENTRY(invalid_TSS)
- pushl $ SYMBOL_NAME(do_invalid_TSS)
+ movw $TRAP_invalid_tss,2(%esp)
jmp error_code
ENTRY(segment_not_present)
- pushl $ SYMBOL_NAME(do_segment_not_present)
+ movw $TRAP_no_segment,2(%esp)
jmp error_code
ENTRY(stack_segment)
- pushl $ SYMBOL_NAME(do_stack_segment)
+ movw $TRAP_stack_error,2(%esp)
jmp error_code
ENTRY(general_protection)
- pushl $ SYMBOL_NAME(do_general_protection)
+ movw $TRAP_gp_fault,2(%esp)
jmp error_code
ENTRY(alignment_check)
- pushl $ SYMBOL_NAME(do_alignment_check)
+ movw $TRAP_alignment_check,2(%esp)
jmp error_code
ENTRY(page_fault)
- pushl $ SYMBOL_NAME(do_page_fault)
+ movw $TRAP_page_fault,2(%esp)
jmp error_code
ENTRY(machine_check)
- pushl $0
- pushl $ SYMBOL_NAME(do_machine_check)
+ pushl $TRAP_machine_check<<16
jmp error_code
ENTRY(spurious_interrupt_bug)
- pushl $0
- pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
+ pushl $TRAP_spurious_int<<16
jmp error_code
ENTRY(nmi)
# epilogue code.
movb XREGS_cs(%esp),%al
testb $3,%al
- jne do_watchdog_tick
+ jnz do_watchdog_tick
movl XREGS_ds(%esp),%eax
cmpw $(__HYPERVISOR_DS),%ax
jne restore_all_xen
addl $8,%esp
movb XREGS_cs(%esp),%al
testb $3,%al
- je restore_all_xen
+ jz restore_all_xen
GET_CURRENT(%ebx)
jmp restore_all_guest
jmp ret_from_intr
.data
+
+ENTRY(exception_table)
+ .long SYMBOL_NAME(do_divide_error)
+ .long SYMBOL_NAME(do_debug)
+ .long 0 # nmi
+ .long SYMBOL_NAME(do_int3)
+ .long SYMBOL_NAME(do_overflow)
+ .long SYMBOL_NAME(do_bounds)
+ .long SYMBOL_NAME(do_invalid_op)
+ .long SYMBOL_NAME(math_state_restore)
+ .long 0 # double fault
+ .long SYMBOL_NAME(do_coprocessor_segment_overrun)
+ .long SYMBOL_NAME(do_invalid_TSS)
+ .long SYMBOL_NAME(do_segment_not_present)
+ .long SYMBOL_NAME(do_stack_segment)
+ .long SYMBOL_NAME(do_general_protection)
+ .long SYMBOL_NAME(do_page_fault)
+ .long SYMBOL_NAME(do_spurious_interrupt_bug)
+ .long SYMBOL_NAME(do_coprocessor_error)
+ .long SYMBOL_NAME(do_alignment_check)
+ .long SYMBOL_NAME(do_machine_check)
+ .long SYMBOL_NAME(do_simd_coprocessor_error)
+
ENTRY(hypercall_table)
.long SYMBOL_NAME(do_set_trap_table) /* 0 */
.long SYMBOL_NAME(do_mmu_update)
{
ti = &d->thread.traps[15];
tb = &d->thread.trap_bounce;
- tb->flags = TBF_TRAP;
+ tb->flags = TBF_EXCEPTION | TBF_EXCEPTION_ERRCODE;
tb->error_code = pb - eip;
tb->cs = ti->cs;
tb->eip = ti->address;
/* ld script to make i386 Linux kernel
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
- * Modifified for i386 Xen by Keir Fraser
+ * Modified for i386 Xen by Keir Fraser
*/
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
__ex_table : { *(__ex_table) } :text
__stop___ex_table = .;
+ . = ALIGN(16); /* Pre-exception table */
+ __start___pre_ex_table = .;
+ __pre_ex_table : { *(__pre_ex_table) } :text
+ __stop___pre_ex_table = .;
+
__start___ksymtab = .; /* Kernel symbol table */
__ksymtab : { *(__ksymtab) } :text
__stop___ksymtab = .;
#include <asm/processor.h>
/* The main trap handlers use these helper macros which include early bail. */
-#define DEBUGGER_trap_entry(_v, _r, _e) \
- if ( debugger_trap_entry(_v, _r, _e) ) return EXCRET_fault_fixed;
-#define DEBUGGER_trap_fatal(_v, _r, _e) \
- if ( debugger_trap_fatal(_v, _r, _e) ) return EXCRET_fault_fixed;
+#define DEBUGGER_trap_entry(_v, _r) \
+ if ( debugger_trap_entry(_v, _r) ) return EXCRET_fault_fixed;
+#define DEBUGGER_trap_fatal(_v, _r) \
+ if ( debugger_trap_fatal(_v, _r) ) return EXCRET_fault_fixed;
#ifdef XEN_DEBUGGER
#include <asm/pdb.h>
static inline int debugger_trap_entry(
- unsigned int vector, struct xen_regs *regs, unsigned int error_code)
+ unsigned int vector, struct xen_regs *regs)
{
int ret = 0;
case TRAP_debug:
if ( pdb_initialized )
{
- pdb_handle_debug_trap(regs, (long)error_code);
+ pdb_handle_debug_trap(regs, regs->error_code);
ret = 1; /* early exit */
}
break;
break;
case TRAP_gp_fault:
- if ( ((regs->cs & 3) != 0) && ((error_code & 3) == 2) &&
+ if ( ((regs->cs & 3) != 0) && ((regs->error_code & 3) == 2) &&
pdb_initialized && (pdb_ctx.system_call != 0) )
{
unsigned long cr3 = read_cr3();
if ( cr3 == pdb_ctx.ptbr )
pdb_linux_syscall_enter_bkpt(
- regs, error_code, current->thread.traps + (error_code>>3));
+ regs, regs->error_code,
+ current->thread.traps + (regs->error_code>>3));
}
break;
}
}
static inline int debugger_trap_fatal(
- unsigned int vector, struct xen_regs *regs, unsigned int error_code)
+ unsigned int vector, struct xen_regs *regs)
{
int ret = 0;
extern int kdb_trap(int, int, struct xen_regs *);
static inline int debugger_trap_entry(
- unsigned int vector, struct xen_regs *regs, unsigned int error_code)
+ unsigned int vector, struct xen_regs *regs)
{
return 0;
}
static inline int debugger_trap_fatal(
- unsigned int vector, struct xen_regs *regs, unsigned int error_code)
+ unsigned int vector, struct xen_regs *regs)
{
return kdb_trap(vector, 0, regs);
}
#else
-#define debugger_trap_entry(_v, _r, _e) (0)
-#define debugger_trap_fatal(_v, _r, _e) (0)
+#define debugger_trap_entry(_v, _r) (0)
+#define debugger_trap_fatal(_v, _r) (0)
#endif
__asm__( \
"\n"__ALIGN_STR"\n" \
SYMBOL_NAME_STR(x) ":\n\t" \
- "push"__OS" $"#v"\n\t" \
+ "push"__OS" $"#v"<<16\n\t" \
SAVE_ALL(a) \
SYMBOL_NAME_STR(call_##x)":\n\t" \
"call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
__asm__( \
"\n"__ALIGN_STR"\n" \
SYMBOL_NAME_STR(x) ":\n\t" \
- "push"__OS" $"#v"\n\t" \
+ "push"__OS" $"#v"<<16\n\t" \
SAVE_ALL(a) \
"mov %"__OP"sp,%"__OP"ax\n\t" \
"push %"__OP"ax\n\t" \
__asm__( \
"\n"__ALIGN_STR"\n" \
SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "push"__OS" $"#nr"\n\t" \
+ "push"__OS" $"#nr"<<16\n\t" \
"jmp common_interrupt");
extern unsigned long prof_cpu_mask;
/*
* 'trap_bounce' flags values.
*/
-#define TBF_TRAP 1
-#define TBF_TRAP_NOCODE 2
-#define TBF_TRAP_CR2 4
+#define TBF_EXCEPTION 1
+#define TBF_EXCEPTION_ERRCODE 2
+#define TBF_EXCEPTION_CR2 4
+#define TBF_INTERRUPT 8
+#define TBF_FAILSAFE 16
+
+/*
+ * thread.flags values.
+ */
+#define TF_failsafe_return 1
#ifndef __ASSEMBLY__
unsigned long guestos_sp;
unsigned long guestos_ss;
+ unsigned long flags; /* TF_ */
+
/* Hardware debugging registers */
unsigned long debugreg[8]; /* %%db0-7 debug registers */
void show_trace(unsigned long *esp);
void show_stack(unsigned long *esp);
void show_registers(struct xen_regs *regs);
-asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs, long error_code);
+asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs);
#endif /* !__ASSEMBLY__ */
#ifndef _I386_REGS_H
#define _I386_REGS_H
+#include <xen/types.h>
+
struct xen_regs
{
/* All saved activations contain the following fields. */
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- long orig_eax;
- long eip;
- int cs;
- long eflags;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ u32 esi;
+ u32 edi;
+ u32 ebp;
+ u32 eax;
+ u16 error_code;
+ u16 entry_vector;
+ u32 eip;
+ u32 cs;
+ u32 eflags;
/* Only saved guest activations contain the following fields. */
- long esp;
- int ss;
- int es;
- int ds;
- int fs;
- int gs;
-};
+ u32 esp;
+ u32 ss;
+ u32 es;
+ u32 ds;
+ u32 fs;
+ u32 gs;
+} __attribute__ ((packed));
enum EFLAGS {
EF_CF = 0x00000001,